return do_domctl(xc_handle, &domctl);
}
+int xc_availheap(int xc_handle,
+ int min_width,
+ int max_width,
+ int node,
+ uint64_t *bytes)
+{
+ DECLARE_SYSCTL;
+ int rc;
+
+ sysctl.cmd = XEN_SYSCTL_availheap;
+ sysctl.u.availheap.min_bitwidth = min_width;
+ sysctl.u.availheap.max_bitwidth = max_width;
+ sysctl.u.availheap.node = node;
+
+ rc = xc_sysctl(xc_handle, &sysctl);
+
+ *bytes = sysctl.u.availheap.avail_bytes;
+
+ return rc;
+}
+
int xc_vcpu_setcontext(int xc_handle,
uint32_t domid,
uint32_t vcpu,
/* Get current total pages allocated to a domain. */
long xc_get_tot_pages(int xc_handle, uint32_t domid);
+/**
+ * This function retrieves the the number of bytes available
+ * in the heap in a specific range of address-widths and nodes.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm domid the domain to query
+ * @parm min_width the smallest address width to query (0 if don't care)
+ * @parm max_width the largest address width to query (0 if don't care)
+ * @parm node the node to query (-1 for all)
+ * @parm *bytes caller variable to put total bytes counted
+ * @return 0 on success, <0 on failure.
+ */
+int xc_availheap(int xc_handle, int min_width, int max_width, int node,
+ uint64_t *bytes);
/*
* Trace Buffer Operations
put_domain(d);
}
+unsigned long avail_domheap_pages_region(
+ unsigned int node, unsigned int min_width, unsigned int max_width)
+{
+ int zone_lo, zone_hi;
+
+ zone_lo = min_width ? (min_width - (PAGE_SHIFT + 1)) : (MEMZONE_XEN + 1);
+ zone_lo = max_t(int, MEMZONE_XEN + 1, zone_lo);
+ zone_lo = min_t(int, NR_ZONES - 1, zone_lo);
+
+ zone_hi = max_width ? (max_width - (PAGE_SHIFT + 1)) : (NR_ZONES - 1);
+ zone_hi = max_t(int, MEMZONE_XEN + 1, zone_hi);
+ zone_hi = min_t(int, NR_ZONES - 1, zone_hi);
+
+ return avail_heap_pages(zone_lo, zone_hi, node);
+}
unsigned long avail_domheap_pages(void)
{
return avail_nrm + avail_dma;
}
-unsigned long avail_nodeheap_pages(int node)
-{
- return avail_heap_pages(0, NR_ZONES - 1, node);
-}
-
static void pagealloc_keyhandler(unsigned char key)
{
unsigned int zone = MEMZONE_XEN;
#include <xen/keyhandler.h>
#include <asm/current.h>
#include <public/sysctl.h>
+#include <asm/numa.h>
+#include <xen/nodemask.h>
extern long arch_do_sysctl(
struct xen_sysctl *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl);
}
break;
+ case XEN_SYSCTL_availheap:
+ {
+ op->u.availheap.avail_bytes = avail_domheap_pages_region(
+ op->u.availheap.node,
+ op->u.availheap.min_bitwidth,
+ op->u.availheap.max_bitwidth);
+ op->u.availheap.avail_bytes <<= PAGE_SHIFT;
+
+ ret = copy_to_guest(u_sysctl, op, 1) ? -EFAULT : 0;
+ }
+ break;
+
default:
ret = arch_do_sysctl(op, u_sysctl);
break;
typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
+#define XEN_SYSCTL_availheap 9
+struct xen_sysctl_availheap {
+ /* IN variables. */
+ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
+ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */
+ int32_t node; /* NUMA node of interest (-1 for all nodes). */
+ /* OUT variables. */
+ uint64_t avail_bytes; /* Bytes available in the specified region. */
+};
+typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
+
struct xen_sysctl {
uint32_t cmd;
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
struct xen_sysctl_getdomaininfolist getdomaininfolist;
struct xen_sysctl_debug_keys debug_keys;
struct xen_sysctl_getcpuinfo getcpuinfo;
+ struct xen_sysctl_availheap availheap;
uint8_t pad[128];
} u;
};
struct domain *d, unsigned int cpu, unsigned int order,
unsigned int memflags);
void free_domheap_pages(struct page_info *pg, unsigned int order);
+unsigned long avail_domheap_pages_region(
+ unsigned int node, unsigned int min_width, unsigned int max_width);
unsigned long avail_domheap_pages(void);
#define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
#define free_domheap_page(p) (free_domheap_pages(p,0))